home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
threads
/
ThreadsUIOMMap.c
< prev
next >
Wrap
C/C++ Source or Header
|
1991-09-10
|
18KB
|
794 lines
/* begincopyright
Copyright (c) 1988 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
endcopyright */
/*
* ThreadsUIOMMap.c
*
* Demers, November 12, 1990 2:35:23 pm PST
* Boehm, August 16, 1991 10:47:45 am PDT
*
* Unix MMap simulation (mapped files, frame buffers, etc),
* for Xerox Runtime threads package.
*/
#include "xr/Threads.h"
#include "xr/ThreadsBackdoor.h"
#include "xr/ThreadsSharedMem.h"
#include "xr/ThreadsMsgPrivate.h"
#include "xr/UIO.h"
#include "xr/UIOPrivate.h"
#include "xr/Errno.h"
#include "xr/ThreadsSchedCtl.h"
/*
* UNIX (SunOS 4.0) dependent memory management ...
*/
#include <sys/types.h>
#include <sys/file.h>
#include <sys/mman.h>
/*
* includes for sending descriptors
*/
#include <sys/uio.h>
#include <sys/socket.h>
#define DEBUG_MMAP 0
/*
* MMap / MUnmap / MProtect / MCtl
*/
typedef struct XR_MMArgsRep {
XR_Pointer mma_addr;
unsigned mma_len;
int mma_prot;
int mma_flags;
int mma_fd;
unsigned mma_off;
bool mma_include_iops; /* Apply Mprotect to IOPs. Assumed true */
/* for MMap / MUnmap. */
int mma_results[XR_MAX_VPS+XR_MAX_IOPS];
} * XR_MMArgs;
/* where to put a result ... */
#define ResultIndex() ((XR_vpe != NIL) \
? XR_vpe->vpe_index \
: XR_MAX_VPS + XR_iope->iope_index )
/* VP order issuer (runs in IOP) */
static int /* -errno */
XR_MemIOPIssueVPOrder (mma, proc)
XR_MMArgs mma;
void (*proc)(/* XR_VPOrder vpo */);
{
int i, ans;
XR_iope->iope_vpOrderBuf.vpo_mma = ((unsigned)(mma));
XR_IssueVPOrder(
/*order*/ &(XR_iope->iope_vpOrderBuf),
/*proc*/ proc,
/*stop*/ FALSE
);
for( i = 0; i < XR_maxVPs; i++ ) {
if( (ans = mma->mma_results[i]) != 0 ) return ans;
}
return 0;
}
/* mmap primitives */
static bool
XR_ValidAddressRangeForMMap(addr, len)
XR_Pointer addr;
unsigned len;
{
XR_Pointer limit;
if( addr != XR_ComputeAddress(addr, 0, XR_ROUND_DOWN) ) {
/* not page-aligned */
return FALSE;
}
#ifdef UNDEFINED
limit = XR_sysArea->sa_heapLimit;
if( (limit == 0) /* heap limit not yet set */
|| (addr < limit) /* heap might grow to here */ ) {
# ifdef UNDEFINED
/* can't do this check yet, because some old code still
calls mmap inside a pointerfree_new'd object, but ... */
return FALSE;
# else
XR_ConsoleMsg(
"%? ValidAddressRangeForMMap addr 0x%x len %d lim 0x%x\n",
addr, len, limit );
# endif
}
#endif
return TRUE;
}
static int /* -errno */
XR_DoMMapProc (mma, fd)
XR_MMArgs mma;
int fd; /* if >= 0, overrides mma->mma_fd */
{
XR_Pointer mmapAns;
int theResult = 0;
#if DEBUG_MMAP
XR_ConsoleMsg("MMapVPOrder addr 0x%x len %d prot 0x%x flags 0x%x\n",
mma->mma_addr, mma->mma_len, mma->mma_prot, mma->mma_flags);
XR_ConsoleMsg(" ... mma_fd %d fd %d off 0x%x\n",
mma->mma_fd, fd, mma->mma_off);
#endif
if( fd < 0 ) fd = mma->mma_fd;
mmapAns = (XR_Pointer) mmap(
mma->mma_addr,
mma->mma_len,
mma->mma_prot,
mma->mma_flags,
fd,
mma->mma_off
);
if( mmapAns != mma->mma_addr ) {
theResult = -errno;
# if DEBUG_MMAP
XR_ConsoleMsg("%? DoMMapProc mmap ans 0x%x errno %d\n",
mmapAns, -theResult);
XR_ConsoleMsg(
"addr 0x%x len %d prot 0x%x flags 0x%x fd %d off %d\n",
mma->mma_addr, mma->mma_len, mma->mma_prot, mma->mma_flags,
fd, mma->mma_off);
# endif
}
mma->mma_results[ResultIndex()] = theResult;
return theResult;
}
/* MMap VP Procs */
void
XR_MMapVPOrderProc (vpo)
XR_VPOrder vpo;
{
XR_MMArgs mma = (XR_MMArgs)(vpo->vpo_mma);
if( XR_vpe == NIL )
XR_Panic("MMapVPOrderProc 0");
(void) XR_DoMMapProc(mma, (-1));
}
/* MMap IOP Procs */
static int /* fd or -errno */
XR_MMapIOPRecvDescriptor ()
{
struct iovec iov[1];
int fd;
struct msghdr h;
int ans;
int buf; /* Dummy to circumvent recvmsg bug */
fd = (-1);
h.msg_name = NIL;
h.msg_namelen = 0;
iov[0].iov_base = (caddr_t)(&buf);
iov[0].iov_len = sizeof (int);
h.msg_iov = iov;
h.msg_iovlen = 1;
h.msg_accrights = ((caddr_t)(&fd));
h.msg_accrightslen = (sizeof fd);
ans = recvmsg(XR_socket0FD, &h, 0); /* XR_ProtectSysCall not required. */
if( ans < 0 ) {
int theError = errno;
# ifdef UNDEFINED
XR_ConsoleMsg("%? errno %d\n", theError);
XR_Panic("MMapIOPRecvDescriptor 0");
# endif
return (-theError);
}
if( fd < 0 ) {
# ifdef UNDEFINED
XR_Panic("MMapIOPRecvDescriptor 0");
# endif
return (-EINVAL);
}
return fd;
}
void
XR_MMapIOPOrderProc (iopo)
XR_IOPOrder iopo;
{
int ans, d;
XR_MMArgs mma = ((XR_MMArgs)(iopo->iopo_mma));
d = XR_MMapIOPRecvDescriptor();
if( d >= 0 ) {
ans = XR_DoMMapProc (mma, d);
(void) close(d);
} else {
ans = d;
}
if( (ans == 0) && (XR_iope->iope_index == 0) ) {
ans = XR_MemIOPIssueVPOrder(mma, XR_MMapVPOrderProc);
}
iopo->iopo_results[0] = ((unsigned)(ans));
XR_UIONotifyIOPODone(iopo);
}
/* MMap Exported to UIO.h */
static void
XR_MMapSendDescriptorLocalProc(order, iope)
XR_IOPOrder order;
XR_IOPE iope;
{
XR_MMArgs mma;
int fd;
struct iovec iov[1];
struct msghdr h;
int ans;
int buf; /* Dummy to circumvent recvmsg bug */
mma = ((XR_MMArgs)(order->iopo_mma));
fd = mma->mma_fd;
h.msg_name = NIL;
h.msg_namelen = 0;
iov[0].iov_base = (caddr_t)(&buf);
iov[0].iov_len = sizeof (int);
h.msg_iov = iov; /* not really used ... */
h.msg_iovlen = 1;
h.msg_accrights = ((caddr_t)(&fd));
h.msg_accrightslen = (sizeof fd);
ans = sendmsg( XR_socket1FD, &h, 0 );
if( ans < 0 ) {
int theError = XR_GetErrno();
XR_ConsoleMsg( "%? sendmsg errno %d socket1FD %d fd %d\n",
theError, XR_socket1FD, fd );
XR_Panic("MMapSendDescriptorLocalProc 0");
}
}
static XR_FDE_FDC_WORKER(XR_MMapWorker)
{
XR_MMArgs mma = ((XR_MMArgs)(x1));
struct XR_IOPOrderRep iopo;
XR_IOPOResult iopoResult;
int ans;
mma->mma_fd = fdc->fdc_index;
iopo.iopo_mma = ((unsigned)(mma));
iopoResult = XR_IssueIOPOrders(
/*order*/ &iopo,
/*proc*/ XR_MMapIOPOrderProc,
/*localProc*/ XR_MMapSendDescriptorLocalProc
);
if( iopoResult != XR_IOPO_RESULT_OK ) {
XR_Panic("MMapWorker 0");
}
ans = ((int)(iopo.iopo_results[0]));
if( ans < 0 ) {
XR_SetErrno(-ans);
return (-1);
}
return ans;
}
int
XR_MMap(addr, len, prot, flags, fildes, off)
XR_Pointer addr;
unsigned len;
int prot;
int flags;
XR_Fildes fildes;
unsigned off;
{
struct XR_MMArgsRep mma;
int ans, savedErrno;
if( fildes == XR_nullFildes ) {
XR_SetErrno(EBADF);
return (-1);
}
if( ((flags & (MAP_SHARED|MAP_FIXED)) != (MAP_SHARED|MAP_FIXED))
|| (!XR_ValidAddressRangeForMMap(addr, len)) ) {
XR_SetErrno(EINVAL);
return(-1);
}
(void)bzero( &mma, (sizeof mma) );
mma.mma_addr = addr;
mma.mma_len = len;
mma.mma_prot = prot;
mma.mma_flags = flags;
mma.mma_fd = ((unsigned)(-1)); /* filled in later from fdc */
mma.mma_off = off;
ans = XR_UIODoWithFDEAndFDC(
/*fildes*/ fildes,
/*waitReadyProc*/ NIL,
/*proc*/ XR_MMapWorker,
/*x1*/ ((unsigned)(&mma)),
/*x2*/ 0
);
if( ans != 0 ) {
savedErrno = XR_GetErrno();
(void) XR_MUnmap(addr, len);
XR_SetErrno(savedErrno);
return (-1);
}
return addr;
}
/* munmap primitives */
#define XR_ValidAddressRangeForMUnmap(addr,len) \
XR_ValidAddressRangeForMMap((addr), (len))
static int /* -errno */
XR_DoMUnmapProc (mma)
XR_MMArgs mma;
{
int munmapAns;
int theResult = 0;
#if DEBUG_MMAP
XR_ConsoleMsg("MUnmapVPOrder addr 0x%x len %d\n",
mma->mma_addr, mma->mma_len);
#endif
munmapAns = munmap(
mma->mma_addr,
mma->mma_len
);
if( munmapAns != 0 ) {
theResult = -errno;
# if DEBUG_MMAP
XR_ConsoleMsg("%? DoMUnmapProc ans 0x%x errno %d\n",
munmapAns, -theResult);
XR_ConsoleMsg("addr 0x%x len %d\n", mma->mma_addr, mma->mma_len);
# endif
}
mma->mma_results[ResultIndex()] = theResult;
return theResult;
}
/* MUnmap VP Procs */
void
XR_MUnmapVPOrderProc (vpo)
XR_VPOrder vpo;
{
XR_MMArgs mma = (XR_MMArgs)(vpo->vpo_mma);
if( XR_vpe == NIL )
XR_Panic("MUnmapVPOrderProc 0");
(void) XR_DoMUnmapProc(mma);
}
/* MUnmap IOP Procs */
void
XR_MUnmapIOPOrderProc (iopo)
XR_IOPOrder iopo;
{
int ans, vpAns;
XR_MMArgs mma = ((XR_MMArgs)(iopo->iopo_mma));
ans = XR_DoMUnmapProc(mma);
vpAns = ( (XR_iope->iope_index == 0)
? XR_MemIOPIssueVPOrder(mma, XR_MUnmapVPOrderProc)
: 0 );
iopo->iopo_results[0] = ( (unsigned)((ans < 0) ? ans : vpAns) );
XR_UIONotifyIOPODone(iopo);
}
/* MUnmap Exported to UIO.h */
int
XR_MUnmap(addr, len)
XR_Pointer addr;
unsigned len;
{
struct XR_MMArgsRep mma;
struct XR_IOPOrderRep iopo;
XR_IOPOResult iopoResult;
int ans;
if( !XR_ValidAddressRangeForMUnmap(addr, len) ) {
XR_SetErrno(EINVAL);
return(-1);
}
(void)bzero( &mma, (sizeof mma) );
mma.mma_addr = addr;
mma.mma_len = len;
iopo.iopo_mma = ((unsigned)(&mma));
iopoResult = XR_IssueIOPOrders(
/*order*/ &iopo,
/*proc*/ XR_MUnmapIOPOrderProc,
/*localProc*/ NIL
);
if( iopoResult != XR_IOPO_RESULT_OK ) {
XR_Panic("XR_MUnmap 0");
}
ans = ((int)(iopo.iopo_results[0]));
if( ans < 0 ) {
XR_SetErrno(-ans);
return (-1);
}
return 0;
}
/* mprotect primitives */
static bool
XR_ValidAddressRangeForMProtect(addr, len)
XR_Pointer addr;
unsigned len;
{
XR_Seg heapSeg;
bool XR_OverlapsVDProtected(/* XR_Pointer addr, unsigned len */);
if( addr != XR_ComputeAddress(addr, 0, XR_ROUND_DOWN) ) {
/* not page-aligned */
return FALSE;
}
/*
* Make sure it's not in the heap or adjacenr areas; the collector has
* exclusive rights there.
*/
return ( ! XR_OverlapsVDProtected(addr, len) );
}
static int /* -errno */
XR_DoMProtectProc (mma)
XR_MMArgs mma;
{
int mprotectAns;
int theResult = 0;
#if DEBUG_MMAP
XR_ConsoleMsg("DoMProtectProc addr 0x%x len %d prot 0x%x\n",
mma->mma_addr, mma->mma_len, mma->mma_prot);
#endif
mprotectAns = mprotect(
mma->mma_addr,
mma->mma_len,
mma->mma_prot
);
if( mprotectAns != 0 ) {
theResult = -errno;
# if DEBUG_MMAP
XR_ConsoleMsg("%? DoMProtectProc mprotect ans 0x%x errno %d\n",
mprotectAns, -theResult);
XR_ConsoleMsg("addr 0x%x len %d prot 0x%x\n",
mma->mma_addr, mma->mma_len, mma->mma_prot);
# endif
}
mma->mma_results[ResultIndex()] = theResult;
return theResult;
}
/* MProtect VP Procs */
void
XR_MProtectVPOrderProc (vpo)
XR_VPOrder vpo;
{
XR_MMArgs mma = (XR_MMArgs)(vpo->vpo_mma);
if( XR_vpe == NIL )
XR_Panic("MProtectVPOrderProc 0");
(void) XR_DoMProtectProc(mma);
}
/* MProtect IOP Procs */
static void
XR_MProtectIOPOrderProc (iopo)
XR_IOPOrder iopo;
{
int ans, vpAns;
XR_MMArgs mma = ((XR_MMArgs)(iopo->iopo_mma));
if (mma -> mma_include_iops) {
ans = XR_DoMProtectProc(mma);
} else {
ans = 0;
}
vpAns = ( (XR_iope->iope_index == 0)
? XR_MemIOPIssueVPOrder(mma, XR_MProtectVPOrderProc)
: 0 );
iopo->iopo_results[0] = ((unsigned)((ans < 0) ? ans : vpAns) );
XR_UIONotifyIOPODone(iopo);
}
/* A version of MProtect that allows heap addresses, does no checking on */
/* on addr, and allows caller to specify whether or not IOPs are included. */
int
XR_MProtect4(addr, len, prot, include_iops)
XR_Pointer addr;
unsigned len;
int prot;
bool include_iops;
{
struct XR_MMArgsRep mma;
struct XR_IOPOrderRep iopo;
XR_IOPOResult iopoResult;
XR_Pri oldPri;
XR_Pri highPri = XR_PRI_SYS_EXCLUSIVE;
int ans;
if (!include_iops && XR_sysArea->sa_numVP == 1) {
/* Just do it and don't bother the iop */
return(mprotect(addr, len, prot));
}
(void)bzero( &mma, (sizeof mma) );
mma.mma_addr = addr;
mma.mma_len = len;
mma.mma_prot = prot;
mma.mma_include_iops = include_iops;
iopo.iopo_mma = ((unsigned)(&mma));
/* Set my priority to a very high value. Otherwise I might not get */
/* immediately notified when this finishes. This will leave me not */
/* running, but holding IOP order locks. This may be better done */
/* in XR_IssueIOPOrders, but this is the case that causes problems. */
oldPri = XR_GetPriority();
XR_SchedCtl(
XR_SchedCtlWhichSelf(),
scop_setPriority,
((unsigned long *)(&highPri))
);
if (include_iops) {
iopoResult = XR_IssueIOPOrders(
/*order*/ &iopo,
/*proc*/ XR_MProtectIOPOrderProc,
/*localProc*/ NIL
);
} else {
iopoResult = XR_IssueIOPOrder(
/*iop*/ &(XR_sysArea->sa_iope[0]),
/*order*/ &iopo,
/*proc*/ XR_MProtectIOPOrderProc,
/*cancel*/ NIL,
/*abortable*/ FALSE,
/*timeout*/ XR_WAIT_FOREVER
);
}
XR_SchedCtl(
XR_SchedCtlWhichSelf(),
scop_setPriority,
((unsigned long *)(&oldPri))
);
if( iopoResult != XR_IOPO_RESULT_OK ) {
XR_Panic("XR_MProtect 0");
}
ans = ((int)(iopo.iopo_results[0]));
if( ans < 0 ) {
XR_SetErrno(-ans);
return (-1);
}
return 0;
}
/* MProtect Exported to UIO.h */
int
XR_MProtect(addr, len, prot)
XR_Pointer addr;
unsigned len;
int prot;
{
struct XR_MMArgsRep mma;
struct XR_IOPOrderRep iopo;
XR_IOPOResult iopoResult;
int ans;
if( !XR_ValidAddressRangeForMProtect(addr, len) ) {
XR_SetErrno(EINVAL);
return(-1);
}
return (XR_MProtect4(addr, len, prot, TRUE));
}
/*
* MCtl
*/
#define mma_function mma_prot
#define mma_arg mma_flags
static int /* -errno */
XR_DoMCtlProc (mma)
XR_MMArgs mma;
{
int mctlAns;
int theResult = 0;
#if DEBUG_MMAP
XR_ConsoleMsg("DoMCtlProc addr 0x%x len %d fcn 0x%x arg 0x%x\n",
mma->mma_addr, mma->mma_len, mma->mma_function, mma->mma_arg);
#endif
mctlAns = mctl(
mma->mma_addr,
mma->mma_len,
mma->mma_function,
mma->mma_arg
);
if( mctlAns < 0 ) {
theResult = -errno;
# if DEBUG_MMAP
XR_ConsoleMsg("%? DoMCtlProc mctl ans 0x%x errno %d\n",
mctlAns, -theResult);
XR_ConsoleMsg("addr 0x%x len %d fcn 0x%x arg 0x%x\n",
mma->mma_addr, mma->mma_len, mma->mma_function, mma->mma_arg);
# endif
}
mma->mma_results[ResultIndex()] = theResult;
return theResult;
}
void
XR_MCtlVPOrderProc (vpo)
XR_VPOrder vpo;
{
XR_MMArgs mma = (XR_MMArgs)(vpo->vpo_mma);
if( XR_vpe == NIL )
XR_Panic("MCtlVPOrderProc 0");
(void) XR_DoMCtlProc(mma);
}
static void
XR_MCtlIOPOrderProc (iopo)
XR_IOPOrder iopo;
{
int ans, vpAns;
XR_MMArgs mma = ((XR_MMArgs)(iopo->iopo_mma));
if (mma -> mma_include_iops) {
ans = XR_DoMCtlProc(mma);
} else {
ans = 0;
}
vpAns = ( (XR_iope->iope_index == 0)
? XR_MemIOPIssueVPOrder(mma, XR_MCtlVPOrderProc)
: 0 );
iopo->iopo_results[0] = ((unsigned)((ans < 0) ? ans : vpAns) );
XR_UIONotifyIOPODone(iopo);
}
int
XR_MCtl5(addr, len, function, arg, include_iops)
XR_Pointer addr;
unsigned len;
int function;
XR_Pointer arg;
bool include_iops;
{
struct XR_MMArgsRep mma;
struct XR_IOPOrderRep iopo;
XR_IOPOResult iopoResult;
int ans;
(void)bzero( &mma, (sizeof mma) );
mma.mma_addr = addr;
mma.mma_len = len;
mma.mma_function = function;
mma.mma_arg = arg;
mma.mma_include_iops = include_iops;
iopo.iopo_mma = ((unsigned)(&mma));
iopoResult = XR_IssueIOPOrders(
/*order*/ &iopo,
/*proc*/ XR_MCtlIOPOrderProc,
/*localProc*/ NIL
);
if( iopoResult != XR_IOPO_RESULT_OK ) {
XR_Panic("XR_MProtect 0");
}
ans = ((int)(iopo.iopo_results[0]));
if( ans < 0 ) {
XR_SetErrno(-ans);
return (-1);
}
return 0;
}
int
XR_MCtl(addr, len, function, arg)
XR_Pointer addr;
unsigned len;
int function;
XR_Pointer arg;
{
return (XR_MCtl5(addr, len, function, arg, TRUE));
}
#undef mma_function
#undef mma_arg
/*
* int msync(XR_Pointer addr, int len, int flags)
*/
int
XR_MSync(addr, len, flags)
XR_Pointer addr;
int len;
int flags;
{
return msync(addr, len, flags);
}